In [35]:
from __future__ import division
from stats import *
import random
from sgd import *
from probability import *

In [2]:
num_friends_good = [49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
daily_minutes_good = [68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]

In [3]:
# work backwards
def predict(alpha, beta, x_i):
    return beta * x_i + alpha

def error(alpha, beta, x_i, y_i):
    """error for pair y_i and predict(x_i)"""
    
    return y_i - predict(alpha, beta, x_i)

def sum_of_squared_error(alpha, beta, x, y):
    return sum([error(alpha, beta, x_i, y_i)**2
               for x_i, y_i in zip(x,y)])

def least_squares_fit(x, y):
    """finds parameters alpha, beta that minimizes sum of square error"""
    beta = correlation(x,y) * standard_deviation(y) / standard_deviation(x)
    alpha = mean(y) - beta * mean(x)
    return alpha, beta

In [4]:
alpha, beta = least_squares_fit(num_friends_good, daily_minutes_good)
alpha, beta


Out[4]:
(22.94755241346903, 0.903865945605865)

In [5]:
# Metrics

def total_sum_of_squares(y):
    """total squared deviation of y from its mean"""
    return sum(v**2 for v in de_mean(y))

def r_squared(alpha, beta, x, y):
    """fraction of variantion in y captured by model"""
    return 1.0 - (sum_of_squared_error(alpha, beta, x, y) / total_sum_of_squares(y))

In [6]:
r_squared(alpha, beta, num_friends_good, daily_minutes_good)


Out[6]:
0.32910783778363073

In [7]:
# Use GD
# try theta = [alpha, beta]

def squared_error(x_i, y_i, theta):
    alpha, beta = theta
    return error(alpha, beta, x_i, y_i) ** 2

def squared_error_gradient(x_i, y_i, theta):
    alpha, beta = theta
    
    return [-2 * error(alpha, beta, x_i, y_i), # alpha partial
           -2 * error(alpha, beta, x_i, y_i) * x_i] # beta partial

In [8]:
random.seed(0)

theta = [random.random(), random.random()]

alpha, beta = minimize_stochastic(squared_error,
                                 squared_error_gradient,
                                 num_friends_good,
                                 daily_minutes_good,
                                 theta,
                                 0.0001)

In [9]:
alpha, beta


Out[9]:
(22.932129109466857, 0.9053515055087813)

In [10]:
# look ma! more variables!
def predict(x_i, beta):
    return dot(x_i, beta)

def error(x_i, y_i, beta):
    return y_i - predict(x_i, beta)

def squared_error(x_i, y_i, beta):
    return error(x_i, y_i, beta) ** 2

def squared_error_gradient(x_i, y_i, beta):
    return [-2 * x_ij * error(x_i, y_i, beta)
           for x_ij in x_i]

def estimate_beta(x, y):
    beta_intial = [random.random() for x_i in x[0]]
    
    return minimize_stochastic(squared_error,
                               squared_error_gradient,
                               x,
                               y,
                              beta_intial,
                              0.001)

In [18]:
x = [[1,49,4,0],[1,41,9,0],[1,40,8,0],[1,25,6,0],[1,21,1,0],[1,21,0,0],[1,19,3,0],[1,19,0,0],[1,18,9,0],[1,18,8,0],[1,16,4,0],[1,15,3,0],[1,15,0,0],[1,15,2,0],[1,15,7,0],[1,14,0,0],[1,14,1,0],[1,13,1,0],[1,13,7,0],[1,13,4,0],[1,13,2,0],[1,12,5,0],[1,12,0,0],[1,11,9,0],[1,10,9,0],[1,10,1,0],[1,10,1,0],[1,10,7,0],[1,10,9,0],[1,10,1,0],[1,10,6,0],[1,10,6,0],[1,10,8,0],[1,10,10,0],[1,10,6,0],[1,10,0,0],[1,10,5,0],[1,10,3,0],[1,10,4,0],[1,9,9,0],[1,9,9,0],[1,9,0,0],[1,9,0,0],[1,9,6,0],[1,9,10,0],[1,9,8,0],[1,9,5,0],[1,9,2,0],[1,9,9,0],[1,9,10,0],[1,9,7,0],[1,9,2,0],[1,9,0,0],[1,9,4,0],[1,9,6,0],[1,9,4,0],[1,9,7,0],[1,8,3,0],[1,8,2,0],[1,8,4,0],[1,8,9,0],[1,8,2,0],[1,8,3,0],[1,8,5,0],[1,8,8,0],[1,8,0,0],[1,8,9,0],[1,8,10,0],[1,8,5,0],[1,8,5,0],[1,7,5,0],[1,7,5,0],[1,7,0,0],[1,7,2,0],[1,7,8,0],[1,7,10,0],[1,7,5,0],[1,7,3,0],[1,7,3,0],[1,7,6,0],[1,7,7,0],[1,7,7,0],[1,7,9,0],[1,7,3,0],[1,7,8,0],[1,6,4,0],[1,6,6,0],[1,6,4,0],[1,6,9,0],[1,6,0,0],[1,6,1,0],[1,6,4,0],[1,6,1,0],[1,6,0,0],[1,6,7,0],[1,6,0,0],[1,6,8,0],[1,6,4,0],[1,6,2,1],[1,6,1,1],[1,6,3,1],[1,6,6,1],[1,6,4,1],[1,6,4,1],[1,6,1,1],[1,6,3,1],[1,6,4,1],[1,5,1,1],[1,5,9,1],[1,5,4,1],[1,5,6,1],[1,5,4,1],[1,5,4,1],[1,5,10,1],[1,5,5,1],[1,5,2,1],[1,5,4,1],[1,5,4,1],[1,5,9,1],[1,5,3,1],[1,5,10,1],[1,5,2,1],[1,5,2,1],[1,5,9,1],[1,4,8,1],[1,4,6,1],[1,4,0,1],[1,4,10,1],[1,4,5,1],[1,4,10,1],[1,4,9,1],[1,4,1,1],[1,4,4,1],[1,4,4,1],[1,4,0,1],[1,4,3,1],[1,4,1,1],[1,4,3,1],[1,4,2,1],[1,4,4,1],[1,4,4,1],[1,4,8,1],[1,4,2,1],[1,4,4,1],[1,3,2,1],[1,3,6,1],[1,3,4,1],[1,3,7,1],[1,3,4,1],[1,3,1,1],[1,3,10,1],[1,3,3,1],[1,3,4,1],[1,3,7,1],[1,3,5,1],[1,3,6,1],[1,3,1,1],[1,3,6,1],[1,3,10,1],[1,3,2,1],[1,3,4,1],[1,3,2,1],[1,3,1,1],[1,3,5,1],[1,2,4,1],[1,2,2,1],[1,2,8,1],[1,2,3,1],[1,2,1,1],[1,2,9,1],[1,2,10,1],[1,2,9,1],[1,2,4,1],[1,2,5,1],[1,2,0,1],[1,2,9,1],[1,2,9,1],[1,2,0,1],[1,2,1,1],[1,2,1,1],[1,2,4,1],[1,1,0,1],[1,1,2,1],[1,1,2,1],[1,1,5,1],[1,1,3,1],[1,1,10,1],[1,1,6,1],[1,1,0,1],[1,1,8,1],[1,1,6,1],[1,1,4,1],[1,1,9,1],[1,1,9,1],[1,1,4,1],[1,1,2,1],[1,1,9,1],[1,1,0,1],[1,1,8,1],[1,1,6,1],[1,1,1,1],[1,1,1,1],[1,1,5,1]]
daily_minutes_good = [68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]

In [19]:
random.seed(0)
beta = estimate_beta(x, daily_minutes_good)

In [20]:
beta


Out[20]:
[30.625234786488353,
 0.9715448288696535,
 -1.8679272872032218,
 0.911456949921445]

Goodness of Fit


In [12]:
def multiple_r_squared(x, y, beta):
    sum_of_squared_errors = sum(error(x_i, y_i, beta) ** 2
                               for x_i, y_i in zip(x,y))
    return 1.0 - sum_of_squared_errors / total_sum_of_squares(y)

In [21]:
multiple_r_squared(x, daily_minutes_good, beta)


Out[21]:
0.6800056770476126

In [22]:
def bootstrap_sample(data):
    """randomly sample len(data) elements with replacement"""
    return [random.choice(data) for _ in data]

def bootstrap_statistic(data, stats_fn, num_samples):
    """evals stats_fn on num_samples bootstraps from data"""
    return [stats_fn(bootstrap_sample(data))
           for _ in range(num_samples)]

In [23]:
close_to_100 = [99.5 + random.random() for _ in range(101)]

far_from_100 = ([99.5 + random.random()] +
               [random.random() for _ in range(50)] +
               [200 + random.random() for _ in range(50)])

In [24]:
#bootstrap_statistic(far_from_100, median, 100)

In [25]:
def estimate_sample_beta(sample):
    """sample data pairs"""
    x_sample, y_sample = zip(*sample) # this will always be awesome
    return estimate_beta(x_sample, y_sample)

In [27]:
random.seed(0)

bootstrap_betas = bootstrap_statistic(zip(x, daily_minutes_good),
                                     estimate_sample_beta,
                                     100)

In [39]:
bootstrap_standard_errors = [standard_deviation([beta[i]
                                                for beta in bootstrap_betas])
                             for i in range(4)]

In [40]:
bootstrap_means = [mean([beta[i]
                         for beta in bootstrap_betas])
                   for i in range(4)]

In [41]:
bootstrap_means


Out[41]:
[30.59224849820316,
 0.9855971016543045,
 -1.8796521171952305,
 0.9888166983406425]

In [42]:
bootstrap_standard_errors


Out[42]:
[1.174097542924062,
 0.07861006463889537,
 0.13138388603694567,
 0.9899022849002838]

In [43]:
def p_value(beta_hat_j, sigma_hat_j):
    if beta_hat_j > 0:
        # probability of seeing something at least this extreme
        return 2 * (1 - normal_cdf(beta_hat_j/sigma_hat_j))
    else:
        return 2 * normal_cdf(beta_hat_j / sigma_hat_j)

In [44]:
[p_value(b,s) for b,s in zip(bootstrap_means, bootstrap_standard_errors)]


Out[44]:
[0.0, 0.0, 0.0, 0.3178415182581884]

Regularization


In [45]:
def ridge_penalty(beta, alpha):
    return alpha * dot(beta[1:], beta[1:])

def squared_error_ridge(x_i, y_i, beta, alpha):
    """estimates error on ridge penalty"""
    return error(x_i, y_i, beta) ** 2 + ridge_penalty(beta, alpha)

In [49]:
def ridge_penalty_gradient(beta, alpha):
    return [0] + [2 * alpha * beta_j for beta_j in beta[1:]]

def squared_error_ridge_gradient(x_i, y_i, beta, alpha):
    """gradient partials of ridge penalty"""
    return vector_add(squared_error_gradient(x_i, y_i, beta),
                     ridge_penalty_gradient(beta, alpha))

def estimate_beta_ridge(x, y, alpha):
    """gradient descent to fit ridge regression"""
    beta_initial = [random.random() for x_i in x[0]]
    return minimize_stochastic(partial(squared_error_ridge, alpha = alpha),
                              partial(squared_error_ridge_gradient, alpha = alpha),
                              x, y,
                              beta_initial,
                              0.001)

In [56]:
random.seed(0)

beta_0 = estimate_beta_ridge(x, daily_minutes_good, alpha = 0.0)
beta_0_01 = estimate_beta_ridge(x, daily_minutes_good, alpha = 0.01)
beta_0_1 = estimate_beta_ridge(x, daily_minutes_good, alpha = 0.1)

In [60]:
beta_0


Out[60]:
[30.625234786488353,
 0.9715448288696535,
 -1.8679272872032218,
 0.911456949921445]

In [61]:
beta_0_01


Out[61]:
[30.528551175260006,
 0.9731181744366826,
 -1.851799008905609,
 0.8915225460735687]

In [62]:
beta_0_1


Out[62]:
[30.838757524394094,
 0.9526782736304008,
 -1.8460496669152224,
 0.5459697239685967]

In [63]:
def lasso_penalty(beta, alpha):
    return alpha * sum(abs(beta_i) for beta_i in beta[1:])

In [ ]: